bitkeeper revision 1.22.2.3 (3e42f257DQUNW5mSYvOQT0Fq2ArFEw)
authorkaf24@labyrinth.cl.cam.ac.uk <kaf24@labyrinth.cl.cam.ac.uk>
Thu, 6 Feb 2003 23:40:07 +0000 (23:40 +0000)
committerkaf24@labyrinth.cl.cam.ac.uk <kaf24@labyrinth.cl.cam.ac.uk>
Thu, 6 Feb 2003 23:40:07 +0000 (23:40 +0000)
sched.h, memory.c, domain.c, dom0_ops.c, ioremap.c, entry.S:
  Fix bug in domain-memory allocation. Per-domain page lists now have a proper Linux-style 'list_head'.

xen-2.4.16/arch/i386/entry.S
xen-2.4.16/arch/i386/ioremap.c
xen-2.4.16/common/dom0_ops.c
xen-2.4.16/common/domain.c
xen-2.4.16/common/memory.c
xen-2.4.16/include/xeno/sched.h

index 103e9e088897b646c7fb8dca12477db59ebc43c0..910e1f19fcbda63c5f45fce5f0a31ae0ddcf794f 100644 (file)
@@ -102,7 +102,7 @@ PROCESSOR       =  0
 STATE           =  4
 HYP_EVENTS      =  8
 DOMAIN          = 12        
-SHARED_INFO     = 24
+SHARED_INFO     = 16
 
 /* Offsets in shared_info_t */
 EVENTS          =  0
index 16d677b79d360f12b0cdb71bd704ac76439bd300..4ed7ba438d373ae175e8a7df2969d20fd70ca53e 100644 (file)
@@ -15,8 +15,8 @@
 
 static unsigned long remap_base = 0;
 
-#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
-#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY)
+#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
+#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY)
 
 #define PAGE_ALIGN(addr)    (((addr)+PAGE_SIZE-1)&PAGE_MASK)
 
index 735f9f0e171d3224b76986a59a8f3b8c218b6626..c2eae723c09ffd6415ef5d22bbeb7eb535d7b0b6 100644 (file)
@@ -32,26 +32,26 @@ static unsigned int get_domnr(void)
 
 static void build_page_list(struct task_struct *p)
 {
-    unsigned long * list;
+    unsigned long *list;
     unsigned long curr;
-    unsigned long page;
     struct list_head *list_ent;
 
-    list = (unsigned long *)map_domain_mem(p->pg_head << PAGE_SHIFT);
-    curr = page = p->pg_head;
-    do {
-        *list++ = page;
-        list_ent = frame_table[page].list.next;
-        page = list_entry(list_ent, struct pfn_info, list) - frame_table;
-        if( !((unsigned long)list & (PAGE_SIZE-1)) )
+    curr = list_entry(p->pg_head.next, struct pfn_info, list) - frame_table;
+    list = (unsigned long *)map_domain_mem(curr << PAGE_SHIFT);
+
+    list_for_each(list_ent, &p->pg_head)
+    {
+        *list++ = list_entry(list_ent, struct pfn_info, list) - frame_table;
+
+        if( ((unsigned long)list & ~PAGE_MASK) == 0 )
         {
-            list_ent = frame_table[curr].list.next;
-            curr = list_entry(list_ent, struct pfn_info, list) - frame_table;
+            struct list_head *ent = frame_table[curr].list.next;
+            curr = list_entry(ent, struct pfn_info, list) - frame_table;
             unmap_domain_mem(list-1);
             list = (unsigned long *)map_domain_mem(curr << PAGE_SHIFT);
         }
     }
-    while ( page != p->pg_head );
+
     unmap_domain_mem(list);
 }
     
@@ -97,37 +97,20 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
         pro = (pro+1) % smp_num_cpus;
         p->processor = pro;
 
-        /* if we are not booting dom 0 than only mem 
-         * needs to be allocated
-         */
-        if(dom != 0){
-
-            if(alloc_new_dom_mem(p, op.u.newdomain.memory_kb) != 0){
-                ret = -1;
-                break;
-            }
-            build_page_list(p);
-            
-            ret = p->domain;
-
-            op.u.newdomain.domain = ret;
-            op.u.newdomain.pg_head = p->pg_head;
-            copy_to_user(u_dom0_op, &op, sizeof(op));
+        if ( dom == 0 ) BUG();
 
-            break;
-        }
+        ret = alloc_new_dom_mem(p, op.u.newdomain.memory_kb);
+        if ( ret != 0 ) break;
 
-        /* executed only in case of domain 0 */
-        ret = setup_guestos(p, &op.u.newdomain);    /* Load guest OS into @p */
-        if ( ret != 0 ) 
-        {
-            p->state = TASK_DYING;
-            release_task(p);
-            break;
-        }
-        wake_up(p);          /* Put @p on runqueue */
-        reschedule(p);       /* Force a scheduling decision on @p's CPU */
+        build_page_list(p);
+        
         ret = p->domain;
+        
+        op.u.newdomain.domain = ret;
+        op.u.newdomain.pg_head = 
+            list_entry(p->pg_head.next, struct pfn_info, list) -
+            frame_table;
+        copy_to_user(u_dom0_op, &op, sizeof(op));
     }
     break;
 
index 2e502a4d75a9049e34f54f05577d4e9f3b10ced6..61323d764db5ba66a82843dee2935f8f480034d3 100644 (file)
@@ -14,8 +14,8 @@
 #include <asm/msr.h>
 #include <xeno/multiboot.h>
 
-#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)
-#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY)
+#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)
+#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY)
 
 extern int nr_mods;
 extern module_t *mod;
@@ -69,7 +69,8 @@ struct task_struct *do_newdomain(void)
      */
     p->blk_ring_base = (blk_ring_t *)(p->shared_info + 1);
     p->net_ring_base = (net_ring_t *)(p->blk_ring_base + 1);
-    p->pg_head = p->tot_pages = 0;
+    INIT_LIST_HEAD(&p->pg_head);
+    p->tot_pages = 0;
     write_lock_irqsave(&tasklist_lock, flags);
     SET_LINKS(p);
     write_unlock_irqrestore(&tasklist_lock, flags);
@@ -243,6 +244,8 @@ long kill_other_domain(unsigned int dom)
 /* Release resources belonging to task @p. */
 void release_task(struct task_struct *p)
 {
+    struct list_head *list_ent, *tmp;
+
     ASSERT(!__task_on_runqueue(p));
     ASSERT(p->state == TASK_DYING);
     ASSERT(!p->has_cpu);
@@ -261,17 +264,15 @@ void release_task(struct task_struct *p)
     }
     if ( p->mm.perdomain_pt ) free_page((unsigned long)p->mm.perdomain_pt);
     free_page((unsigned long)p->shared_info);
-    if ( p->tot_pages != 0 )
+
+    list_for_each_safe(list_ent, tmp, &p->pg_head)
     {
-        /* Splice domain's pages into the free list. */
-        struct list_head *first = &frame_table[p->pg_head].list;
-        struct list_head *last  = first->prev;
-        free_list.next->prev = last;
-        last->next = free_list.next;
-        free_list.next = first;
-        first->prev = &free_list;            
-        free_pfns += p->tot_pages;
+        struct pfn_info *pf = list_entry(list_ent, struct pfn_info, list);
+        pf->type_count = pf->tot_count = pf->flags = 0;
+        list_del(list_ent);
+        list_add(list_ent, &free_list);
     }
+
     free_task_struct(p);
 }
 
@@ -350,7 +351,7 @@ asmlinkage void schedule(void)
 unsigned int alloc_new_dom_mem(struct task_struct *p, unsigned int kbytes)
 {
     struct list_head *temp;
-    struct pfn_info *pf, *pf_head;
+    struct pfn_info *pf;
     unsigned int alloc_pfns;
     unsigned int req_pages;
 
@@ -358,33 +359,18 @@ unsigned int alloc_new_dom_mem(struct task_struct *p, unsigned int kbytes)
     req_pages = kbytes >> (PAGE_SHIFT - 10);
 
     /* is there enough mem to serve the request? */   
-    if(req_pages > free_pfns)
-        return -1;
+    if ( req_pages > free_pfns ) return -1;
     
     /* allocate pages and build a thread through frame_table */
     temp = free_list.next;
-
-    /* allocate first page */
-    pf = pf_head = list_entry(temp, struct pfn_info, list);
-    pf->flags |= p->domain;
-    temp = temp->next;
-    list_del(&pf->list);
-    INIT_LIST_HEAD(&pf->list);
-    p->pg_head = pf - frame_table;
-    pf->type_count = pf->tot_count = 0;
-    free_pfns--;
-
-    /* allocate the rest */
-    for ( alloc_pfns = req_pages - 1; alloc_pfns; alloc_pfns-- )
+    for ( alloc_pfns = 0; alloc_pfns < req_pages; alloc_pfns++ )
     {
         pf = list_entry(temp, struct pfn_info, list);
         pf->flags |= p->domain;
+        pf->type_count = pf->tot_count = 0;
         temp = temp->next;
         list_del(&pf->list);
-
-        list_add_tail(&pf->list, &pf_head->list);
-        pf->type_count = pf->tot_count = 0;
-
+        list_add_tail(&pf->list, &p->pg_head);
         free_pfns--;
     }
     
@@ -532,11 +518,12 @@ int final_setup_guestos(struct task_struct * p, dom_meminfo_t * meminfo)
 static unsigned long alloc_page_from_domain(unsigned long * cur_addr, 
     unsigned long * index)
 {
-    struct list_head *ent = frame_table[*cur_addr >> PAGE_SHIFT].list.prev;
+    unsigned long ret = *cur_addr;
+    struct list_head *ent = frame_table[ret >> PAGE_SHIFT].list.prev;
     *cur_addr = list_entry(ent, struct pfn_info, list) - frame_table;
     *cur_addr <<= PAGE_SHIFT;
     (*index)--;    
-    return *cur_addr;
+    return ret;
 }
 
 /* setup_guestos is used for building dom0 solely. other domains are built in
@@ -578,7 +565,9 @@ int setup_guestos(struct task_struct *p, dom0_newdomain_t *params)
     }
 
     if ( alloc_new_dom_mem(p, params->memory_kb) ) return -ENOMEM;
-    alloc_address = p->pg_head << PAGE_SHIFT;
+    alloc_address = list_entry(p->pg_head.prev, struct pfn_info, list) -
+        frame_table;
+    alloc_address <<= PAGE_SHIFT;
     alloc_index = p->tot_pages;
 
     if ( (mod[nr_mods-1].mod_end-mod[0].mod_start) > 
@@ -615,7 +604,9 @@ int setup_guestos(struct task_struct *p, dom0_newdomain_t *params)
      */
 
     l2tab += l2_table_offset(virt_load_address);
-    cur_address = p->pg_head << PAGE_SHIFT;
+    cur_address = list_entry(p->pg_head.next, struct pfn_info, list) -
+        frame_table;
+    cur_address <<= PAGE_SHIFT;
     for ( count = 0; count < p->tot_pages + 1; count++ )
     {
         if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
@@ -647,7 +638,9 @@ int setup_guestos(struct task_struct *p, dom0_newdomain_t *params)
     unmap_domain_mem(l1start);
 
     /* pages that are part of page tables must be read only */
-    cur_address = p->pg_head << PAGE_SHIFT;
+    cur_address = list_entry(p->pg_head.next, struct pfn_info, list) -
+        frame_table;
+    cur_address <<= PAGE_SHIFT;
     for ( count = 0; count < alloc_index; count++ ) 
     {
         list_ent = frame_table[cur_address >> PAGE_SHIFT].list.next;
index f8427e6df5e6061abbd72ba8e6381952821c6d2d..8c75f08dbb12194d4bad9da0817854898e52a785 100644 (file)
 #include <asm/uaccess.h>
 #include <asm/domain_page.h>
 
-#if 1
+#if 0
 #define MEM_LOG(_f, _a...) printk("DOM%d: (file=memory.c, line=%d) " _f "\n", current->domain, __LINE__, ## _a )
 #else
 #define MEM_LOG(_f, _a...) ((void)0)
index 7afc6d9e1d9de70ca139f0981dbbd1b4bf7f6864..6f2863e8a557d4b190f45503fe6644568f94357a 100644 (file)
@@ -63,17 +63,12 @@ struct task_struct {
     int state, hyp_events;
     unsigned int domain;
 
-    /* index into frame_table threading pages belonging to this
-     * domain together. these are placed at the top of the structure
-     * to avoid nasty padding for various kernel structs when using
-     * task_struct in user space
-     */
-    unsigned long pg_head;
-    unsigned int tot_pages;
-
     /* An unsafe pointer into a shared data area. */
     shared_info_t *shared_info;
     
+    struct list_head pg_head;
+    unsigned int tot_pages;
+
     /* Pointer to this guest's virtual interfaces. */
     /* network */
     net_ring_t *net_ring_base;